return -1;
}
+void hvm_hlt(unsigned long rflags)
+{
+ struct vcpu *v = current;
+ struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
+ s_time_t next_pit = -1, next_wakeup;
+
+ /*
+ * Detect machine shutdown. Only do this for vcpu 0, to avoid potentially
+ * shutting down the domain early. If we halt with interrupts disabled,
+ * that's a pretty sure sign that we want to shut down. In a real
+ * processor, NMIs are the only way to break out of this.
+ */
+ if ( (v->vcpu_id == 0) && !(rflags & X86_EFLAGS_IF) )
+ {
+ printk("D%d: HLT with interrupts enabled -- shutting down.\n",
+ current->domain->domain_id);
+ domain_shutdown(current->domain, SHUTDOWN_poweroff);
+ return;
+ }
+
+ if ( !v->vcpu_id )
+ next_pit = get_scheduled(v, pt->irq, pt);
+ next_wakeup = get_apictime_scheduled(v);
+ if ( (next_pit != -1 && next_pit < next_wakeup) || next_wakeup == -1 )
+ next_wakeup = next_pit;
+ if ( next_wakeup != - 1 )
+ set_timer(¤t->arch.hvm_vcpu.hlt_timer, next_wakeup);
+ do_sched_op_compat(SCHEDOP_block, 0);
+}
+
/*
* Copy from/to guest virtual.
*/
destroy_vmcb(&v->arch.hvm_svm);
free_monitor_pagetable(v);
- kill_timer(&v->arch.hvm_svm.hlt_timer);
+ kill_timer(&v->arch.hvm_vcpu.hlt_timer);
if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
{
kill_timer( &(VLAPIC(v)->vlapic_timer) );
if ( pt->enabled ) {
migrate_timer( &pt->timer, v->processor );
- migrate_timer( &v->arch.hvm_svm.hlt_timer, v->processor );
+ migrate_timer( &v->arch.hvm_vcpu.hlt_timer, v->processor );
}
if ( hvm_apic_support(v->domain) && VLAPIC( v ))
migrate_timer( &(VLAPIC(v)->vlapic_timer ), v->processor );
}
-/*
- * Need to use this exit to reschedule
- */
static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
{
- struct vcpu *v = current;
- struct periodic_time *pt =
- &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
- s_time_t next_pit = -1, next_wakeup;
-
__update_guest_eip(vmcb, 1);
- /* check for interrupt not handled or new interrupt */
- if ( vmcb->vintr.fields.irq || cpu_has_pending_irq(v) )
+ /* Check for interrupt not handled or new interrupt. */
+ if ( (vmcb->rflags & X86_EFLAGS_IF) &&
+ (vmcb->vintr.fields.irq || cpu_has_pending_irq(current)) )
return;
- /* Detect machine shutdown. Only do this for vcpu 0, to avoid
- potentially shutting down the domain early. */
- if (v->vcpu_id == 0) {
- unsigned long rflags = vmcb->rflags;
- /* If we halt with interrupts disabled, that's a pretty sure
- sign that we want to shut down. In a real processor, NMIs
- are the only way to break out of this. Our SVM code won't
- deliver interrupts, but will wake it up whenever one is
- pending... */
- if(!(rflags & X86_EFLAGS_IF)) {
- printk("D%d: HLT with interrupts enabled @0x%lx Shutting down.\n",
- current->domain->domain_id, (unsigned long)vmcb->rip);
- domain_shutdown(current->domain, SHUTDOWN_poweroff);
- return;
- }
- }
-
- if ( !v->vcpu_id )
- next_pit = get_scheduled(v, pt->irq, pt);
- next_wakeup = get_apictime_scheduled(v);
- if ( (next_pit != -1 && next_pit < next_wakeup) || next_wakeup == -1 )
- next_wakeup = next_pit;
- if ( next_wakeup != - 1 )
- set_timer(¤t->arch.hvm_svm.hlt_timer, next_wakeup);
- do_sched_op_compat(SCHEDOP_block, 0);
+ hvm_hlt(vmcb->rflags);
}
if (hvm_apic_support(v->domain))
vlapic_init(v);
- init_timer(&v->arch.hvm_svm.hlt_timer,
- hlt_timer_fn, v, v->processor);
+ init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
vmcb->ldtr.sel = 0;
vmcb->ldtr.base = 0;
vlapic_init(v);
vmx_set_host_env(v);
- init_timer(&v->arch.hvm_vmx.hlt_timer, hlt_timer_fn, v, v->processor);
+ init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
error |= __vmwrite(GUEST_LDTR_BASE, 0);
if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
continue;
free_monitor_pagetable(v);
- kill_timer(&v->arch.hvm_vmx.hlt_timer);
+ kill_timer(&v->arch.hvm_vcpu.hlt_timer);
if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
{
kill_timer(&VLAPIC(v)->vlapic_timer);
if ( pt->enabled ) {
migrate_timer(&pt->timer, v->processor);
- migrate_timer(&v->arch.hvm_vmx.hlt_timer, v->processor);
+ migrate_timer(&v->arch.hvm_vcpu.hlt_timer, v->processor);
}
if ( hvm_apic_support(v->domain) && VLAPIC(v))
migrate_timer(&(VLAPIC(v)->vlapic_timer), v->processor);
(unsigned long)regs->edx);
}
-/*
- * Need to use this exit to reschedule
- */
void vmx_vmexit_do_hlt(void)
{
- struct vcpu *v = current;
- struct periodic_time *pt =
- &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
- s_time_t next_pit = -1, next_wakeup;
-
-
- /* Detect machine shutdown. Only do this for vcpu 0, to avoid
- potentially shutting down the domain early. */
- if (v->vcpu_id == 0) {
- unsigned long rflags;
-
- __vmread(GUEST_RFLAGS, &rflags);
- /* If we halt with interrupts disabled, that's a pretty sure
- sign that we want to shut down. In a real processor, NMIs
- are the only way to break out of this. Our VMX code won't
- deliver interrupts, but will wake it up whenever one is
- pending... */
- if(!(rflags & X86_EFLAGS_IF)) {
- unsigned long rip;
- __vmread(GUEST_RIP, &rip);
- printk("D%d: HLT with interrupts enabled @0x%lx Shutting down.\n",
- current->domain->domain_id, rip);
- domain_shutdown(current->domain, SHUTDOWN_poweroff);
- return;
- }
- }
-
- if ( !v->vcpu_id )
- next_pit = get_scheduled(v, pt->irq, pt);
- next_wakeup = get_apictime_scheduled(v);
- if ( (next_pit != -1 && next_pit < next_wakeup) || next_wakeup == -1 )
- next_wakeup = next_pit;
- if ( next_wakeup != - 1 )
- set_timer(¤t->arch.hvm_vmx.hlt_timer, next_wakeup);
- do_sched_op_compat(SCHEDOP_block, 0);
+ unsigned long rflags;
+ __vmread(GUEST_RFLAGS, &rflags);
+ hvm_hlt(rflags);
}
static inline void vmx_vmexit_do_extint(struct cpu_user_regs *regs)
void hvm_prod_vcpu(struct vcpu *v);
+void hvm_hlt(unsigned long rflags);
+
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
unsigned long cpu_cr2;
unsigned long cpu_cr3;
unsigned long cpu_state;
- struct timer hlt_timer; /* hlt ins emulation wakeup timer */
};
extern struct vmcb_struct *alloc_vmcb(void);
/* Flags */
int flag_dr_dirty;
+ /* hlt ins emulation wakeup timer */
+ struct timer hlt_timer;
+
union {
struct arch_vmx_struct vmx;
struct arch_svm_struct svm;
} u;
};
-#define ARCH_HVM_IO_WAIT 1 /* Waiting for I/O completion */
+#define ARCH_HVM_IO_WAIT 1 /* Waiting for I/O completion */
-#define HVM_CONTEXT_STACK_BYTES (offsetof(struct cpu_user_regs, error_code))
+#define HVM_CONTEXT_STACK_BYTES (offsetof(struct cpu_user_regs, error_code))
#endif /* __ASM_X86_HVM_VCPU_H__ */
unsigned long cpu_based_exec_control;
struct vmx_msr_state msr_content;
void *io_bitmap_a, *io_bitmap_b;
- struct timer hlt_timer; /* hlt ins emulation wakeup timer */
};
#define vmx_schedule_tail(next) \